static void svm_relinquish_guest_resources(struct domain *d);
+/* Host save area */
+struct host_save_area *host_save_area[ NR_CPUS ] = {0};
static struct asid_pool ASIDpool[NR_CPUS];
/*
void stop_svm(void)
{
u32 eax, edx;
+ int cpu = smp_processor_id();
/* We turn off the EFER_SVME bit. */
rdmsr(MSR_EFER, eax, edx);
eax &= ~EFER_SVME;
wrmsr(MSR_EFER, eax, edx);
+
+ /* release the HSA */
+ free_host_save_area( host_save_area[ cpu ] );
+ host_save_area[ cpu ] = NULL;
printk("AMD SVM Extension is disabled.\n");
}
int start_svm(void)
{
u32 eax, ecx, edx;
-
- /* Xen does not fill x86_capability words except 0. */
+ u32 phys_hsa_lo, phys_hsa_hi;
+ u64 phys_hsa;
+ int cpu = smp_processor_id();
+
+ /* Xen does not fill x86_capability words except 0. */
ecx = cpuid_ecx(0x80000001);
boot_cpu_data.x86_capability[5] = ecx;
eax |= EFER_SVME;
wrmsr(MSR_EFER, eax, edx);
asidpool_init(smp_processor_id());
- printk("AMD SVM Extension is enabled for cpu %d.\n", smp_processor_id());
+ printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
+
+ /* Initialize the HSA for this core */
+ host_save_area[ cpu ] = alloc_host_save_area();
+ phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] );
+ phys_hsa_lo = (u32) phys_hsa;
+ phys_hsa_hi = (u32) (phys_hsa >> 32);
+ wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
/* Setup HVM interfaces */
hvm_funcs.disable = stop_svm;
ctxt->ds = vmcb->ds.sel;
}
-#if defined (__x86_64__)
-void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v )
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- regs->rip = vmcb->rip;
- regs->rsp = vmcb->rsp;
- regs->rflags = vmcb->rflags;
- regs->cs = vmcb->cs.sel;
- regs->ds = vmcb->ds.sel;
- regs->es = vmcb->es.sel;
- regs->ss = vmcb->ss.sel;
-}
-#elif defined (__i386__)
void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
regs->ds = vmcb->ds.sel;
regs->es = vmcb->es.sel;
regs->ss = vmcb->ss.sel;
+ regs->fs = vmcb->fs.sel;
+ regs->gs = vmcb->gs.sel;
}
-#endif
/* XXX Use svm_load_cpu_guest_regs instead */
-#if defined (__i386__)
void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
vmcb->rflags = regs->eflags;
vmcb->cs.sel = regs->cs;
vmcb->rip = regs->eip;
+
+ vmcb->ds.sel = regs->ds;
+ vmcb->es.sel = regs->es;
+ vmcb->fs.sel = regs->fs;
+ vmcb->gs.sel = regs->gs;
+
if (regs->eflags & EF_TF)
*intercepts |= EXCEPTION_BITMAP_DB;
else
*intercepts &= ~EXCEPTION_BITMAP_DB;
}
-#else /* (__i386__) */
-void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
-
- /* Write the guest register value into VMCB */
- vmcb->rax = regs->rax;
- vmcb->ss.sel = regs->ss;
- vmcb->rsp = regs->rsp;
- vmcb->rflags = regs->rflags;
- vmcb->cs.sel = regs->cs;
- vmcb->rip = regs->rip;
- if (regs->rflags & EF_TF)
- *intercepts |= EXCEPTION_BITMAP_DB;
- else
- *intercepts &= ~EXCEPTION_BITMAP_DB;
-}
-#endif /* !(__i386__) */
int svm_paging_enabled(struct vcpu *v)
{
{
if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
continue;
-#if 0
- /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
- free_host_save_area(v->arch.hvm_svm.host_save_area);
-#endif
destroy_vmcb(&v->arch.hvm_svm);
free_monitor_pagetable(v);
#include <xen/kernel.h>
#include <xen/domain_page.h>
+extern struct host_save_area *host_save_area[];
extern int svm_dbg_on;
extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
int oldcore, int newcore);
+extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
#define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
{
int error;
long rc=0;
- struct host_save_area *hsa = NULL;
- u64 phys_hsa;
memset(arch_svm, 0, sizeof(struct arch_svm_struct));
goto err_out;
}
- /*
- * The following code is for allocating host_save_area.
- * Note: We either allocate a Host Save Area per core or per VCPU.
- * However, we do not want a global data structure
- * for HSA per core, we decided to implement a HSA for each VCPU.
- * It will waste space since VCPU number is larger than core number.
- * But before we find a better place for HSA for each core, we will
- * stay will this solution.
- */
-
- if (!(hsa = alloc_host_save_area()))
- {
- printk("Failed to allocate Host Save Area\n");
- rc = -ENOMEM;
- goto err_out;
- }
-
- phys_hsa = (u64) virt_to_maddr(hsa);
- arch_svm->host_save_area = hsa;
- arch_svm->host_save_pa = phys_hsa;
-
+ /* update the HSA for the current Core */
+ set_hsa_to_guest( arch_svm );
arch_svm->vmcb_pa = (u64) virt_to_maddr(arch_svm->vmcb);
- if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa)))
- {
- printk("construct_vmcb: load_vmcb failed: VMCB = %lx\n",
- (unsigned long) arch_svm->host_save_pa);
- rc = -EINVAL;
- goto err_out;
- }
-
if ((error = construct_vmcb_controls(arch_svm)))
{
printk("construct_vmcb: construct_vmcb_controls failed\n");
}
-int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa)
+void set_hsa_to_guest( struct arch_svm_struct *arch_svm )
{
- u32 phys_hsa_lo, phys_hsa_hi;
-
- phys_hsa_lo = (u32) phys_hsa;
- phys_hsa_hi = (u32) (phys_hsa >> 32);
-
- wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
- set_bit(ARCH_SVM_VMCB_LOADED, &arch_svm->flags);
- return 0;
+ arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
+ arch_svm->host_save_pa = (u64)virt_to_maddr( arch_svm->host_save_area );
}
-
/*
* Resume the guest.
*/
struct hvm_time_info *time_info = &vpit->time_info;
svm_stts(v);
+
+ /* make sure the HSA is set for the current core */
+ set_hsa_to_guest( &v->arch.hvm_svm );
/* pick up the elapsed PIT ticks and re-enable pit_timer */
if ( time_info->first_injected ) {